In [ ]:
%matplotlib inline
import pandas as pd
import zipfile
import shutil
import urllib2
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.dates as dates
from urllib2 import urlopen
import time
from bs4 import BeautifulSoup
from pylab import rcParams
import platform
rcParams['figure.figsize'] = 15, 10
import re
import os
import glob
import urllib
import gzip
import ftplib
import calendar
import datetime
from datetime import date
import pymodis
In [ ]:
import arcpy
arcpy.CheckOutExtension("spatial")
from arcpy.sa import *
In [ ]:
print("Operating System " + platform.system() + " " + platform.release())
print("Python Version " + str(sys.version))
print("Pandas Version " + str(pd.__version__))
print("Numpy Version " + str(np.__version__))
In [ ]:
import UBM
UBM.__version__
Potential and Actual Evapotranspiration
In [ ]:
#Download HDF Files
#'h09v05','h09v04','h08v05'
tiles = ['h09v05','h09v05','h09v04','h08v05']
save_path = 'H:/GIS/MODIS/'
In [ ]:
UBM.get_modis(tiles, save_path)
In [ ]:
files = UBM.get_file_list(save_path)
These scripts reproject the hdf files from the wacky sinusoidal MODIS projection to NAD83 Zone 12 for analysis. In this section, the MODIS rasters are also clipped to Utah Watersheds (see image below) and the fill values are made to null values. Fill values are described in the MODIS16 documentation:
In [ ]:
save_path = 'H:/GIS/MODIS/'
UBM.reproject_modis(files,save_path,'ET')
In [ ]:
path = "H:/GIS/MODIS/ET/"
outpath="H:/GIS/MODIS16.gdb/"
UBM.clip_and_fix(path,outpath,'ET')
In [ ]:
save_path = "H:/GIS/MODIS/PET/"
UBM.reproject_modis(files,save_path,'PET')
In [ ]:
path = "H:/GIS/MODIS/PET/"
outpath="H:/GIS/MODIS16.gdb/"
UBM.clip_and_fix(path,outpath,'PET')
MODIS data is downloaded as separate cells based on the Sinusoidal grid. Three MODIS cells cover Utah ('h09v05','h09v04','h08v05'). The following scripts mosaic (merge) the three individual rasters for each month into one seamless monthly raster for the entire state.
In [ ]:
path="H:/GIS/MODIS16.gdb/"
UBM.merge_rasts(path,data_type='ET',monthRange=[1,12],yearRange=[2000,2015])
In [ ]:
path="H:/GIS/MODIS16.gdb/"
UBM.merge_rasts(path,data_type='PET',monthRange=[1,12],yearRange=[2000,2015])
The documentation for the dataset says that the raw files have to be multiplied by 0.1 to get mm/month. To convert to meters per month, we have to multiply the raw files by 0.0001 (0.1 x 0.001) or divide by 10,000. These scripts do that division.
In [ ]:
path="H:/GIS/MODIS16.gdb/"
arcpy.env.workspace = path
scalingFactor = 10000.0 #convert to m/month
out_path="H:/GIS/MODIS16.gdb/"
UBM.scale_modis(path, out_path, scaleby = 10000.0, data_type = 'ET')
UBM.scale_modis(path, out_path, scaleby = 10000.0, data_type = 'PET')
This deletes intermediate files left over from previous processing steps. For whatever reason, it take a while.
In [ ]:
path="H:/GIS/MODIS16/"
arcpy.env.workspace = path
for rast in arcpy.ListRasters('*c'):
print(rast)
arcpy.Delete_management(rast, 'RasterDataset')
for rast in arcpy.ListRasters('*h*v*'):
print(rast)
arcpy.Delete_management(rast, 'RasterDataset')
This processing step fills in null values created when the fill values were removed above.
In [ ]:
path= "H:/GIS/MODIS2/MODIS.gdb/"
outpath = "H:/GIS/MODIS2/MODIS16.gdb/"
units = 'CELL'
radius = 15
arcpy.env.workspace = path
def fill_holes(path, outpath, wildcard, units='CELL', radius=15):
for rast in arcpy.ListRasters(wildcard):
rfilled = arcpy.sa.Con(arcpy.sa.IsNull(rast),
arcpy.sa.FocalStatistics(rast,
arcpy.sa.NbrCircle(radius, units),'MEAN'), rast)
dsc = arcpy.Describe(rast)
nm = dsc.baseName
rfilled.save(outpath+nm)
print(nm)
In [ ]:
Data were downloaded using polaris: http://nsidc.org/data/polaris/
Alternatively, you can download from the ftp, which is slower.
ftp://sidads.colorado.edu/pub/DATASETS/NOAA/G02158/
https://support.nsidc.org/entries/64231694-FTP-Client-Data-Access
In [ ]:
L = u'H:\GIS\SNODAS'
UBM.get_snodas(L)
In [ ]:
#https://github.com/PSU-CSAR/SNODAS-SWE/blob/master/snodas-swe-prep.py
Unzip tar files and put in unzip file
In [ ]:
snodasTars = glob.glob(u'H:/GIS/SNODAS/*.tar')
for tared in snodasTars:
untar(tared,u'H:/GIS/SNODAS/SNODASUNZIPPED/')
Ungz gz files and delete gz files
In [ ]:
unsnodasTars = glob.glob(u'H:/GIS/SNODAS/SNODASUNZIPPED/*.gz')
for tared in unsnodasTars:
ungz(tared, deletesource=True)
Replace header file to make compatible with ArcGIS
In [ ]:
for hdrfile in glob.glob("H:/GIS/SNODAS/SNODASUNZIPPED/*.Hdr"):
replace_hdr_file(hdrfile)
Downloaded data from http://nsidc.org/data/polaris/, which allows for clipping to an area, as well as output as GeoTiff Format. The download takes about 3 days and the unzipping takes about 2 hours.
Extent:
Selected Variables:
Snow Melt Runoff at the Base of the Snow Pack, Sublimation of Blowing Snow, Snow Pack Average Temperature, Sublimation from the Snow Pack, Snow Water Equivalent, Liquid Precipitation, Snow Depth, Solid Precipitation
After unzipping files, rename them to make them easier to handle.
From http://nsidc.org/data/docs/noaa/g02158_snodas_snow_cover_model/, the file abbreviations are as follows:
In [ ]:
path = "C:/Users/PAULINKENBRANDT/Downloads/NSIDC_Data"
UBM.rename_polaris_snodas(path)
This merge uses geoTiff files from Polaris
Includes snow and rain, which are provided as separate data sets in the SNOTEL data.
This is a measure of the cumulative incoming rain over a month.
Units are meters of water per month.
In [ ]:
UBM.snow_summary('RAIN',10000.0, monthRange = [1,12], yearRange = [2015,2015])
This represents the "dry" precipitation that falls to the ground as snow.
We summed daily data to create cumulative monthly snowfall.
Units are in meters of water per month.
In [ ]:
UBM.snow_summary('SNOW',10000.0)
In [ ]:
monthRange = [1,12]
yearRange = [2003,2016]
g = {}
path="H:/GIS/SNODAS.gdb/"
arcpy.env.workspace = path
arcpy.env.overwriteOutput = True
for y in range(yearRange[0],yearRange[1]+1): #set years converted here
for m in range(monthRange[0],monthRange[1]+1): #set months converted here
my = str(y)+str(m).zfill(2)
newdn = 'TPPT' + my
try:
calc = Plus('SNOW'+ my +'SUM', 'RAIN'+ my +'SUM')
calc.save(newdn+'SUM')
print(newdn)
except(RuntimeError):
pass
SWE = Snow-water equivalent; This is a measure of the estimated total water content of the snow pack.
The median is used to summarize these data.
Units are in meters of water.
In [ ]:
UBM.snow_summary('SWEQ', 1000.0, 'MEDIAN', monthRange=[1, 12], yearRange=[2003, 2016])
SNML = Cumulative monthly snowmelt calculated by summing daily snowmelt data.
Units are in meters of water per month.
Snow Melt Runoff at the Base of the Snow Pack.
Units are meters of water per month.
In [ ]:
UBM.snow_summary('SNML', 100000.0, 'SUM', monthRange=[1, 12], yearRange=[2003, 2015])
BSSB = Cumulative monthly blowing snow sublimation.
SPSB = Cumulative monthly snow pack sublimation.
TSSB = Total Cumulative monthly snow sublimation.
Units are in meters of water per month.
In [ ]:
UBM.snow_summary('BSSB', 100000.0, 'SUM', monthRange=[9, 9], yearRange=[2016, 2016])
In [ ]:
UBM.snow_summary('SPSB', 100000.0, 'SUM', monthRange=[9, 9], yearRange=[2016, 2016])
In [ ]:
g = {}
path="H:/GIS/SNODAS.gdb/"
arcpy.env.workspace = path
arcpy.env.overwriteOutput = True
monthRange = [1,12]
yearRange = [2003,2016]
for y in range(yearRange[0],yearRange[1]+1): #set years converted here
for m in range(monthRange[0],monthRange[1]+1): #set months converted here
my = str(y)+str(m).zfill(2)
newdn = 'TSSB' + my
try:
calc = Plus('BSSB'+ my +'SUM', 'SPSB'+ my +'SUM')
calc.save(newdn+'SUM')
print(newdn)
except(RuntimeError):
pass
In [ ]:
UBM.totalavg('TSSB')
In [ ]:
UBM.totalavg('SNML', monthRange=[2, 12], yearRange=[2003, 2016])
In [ ]:
soildir = 'H:/GIS/soils/STATSGO/'
keeplist = ['hzname', 'hzdept_r','hzdepb_r','hzthk_r','dbovendry_r','ksat_r','awc_r','wthirdbar_r',
'wfifteenbar_r','mukey','cokey','chkey']
chorizon = pd.read_excel(soildir+'chorizon.xlsx')
droplist = list(set(list(chorizon.columns)) - set(keeplist))
chorizon.drop(droplist, axis=1, inplace=True)
In [ ]:
chorizon.columns = [str(x).strip() for x in list(chorizon.columns)]
In [ ]:
chorizon.columns
Get thickness of all measured horizons and calculate weighting factor for each horizon.
In [ ]:
chorizon['hzthk_r'] = chorizon['hzdepb_r'] - chorizon['hzdept_r']
thick = chorizon.groupby(['cokey'])['hzthk_r'].sum().to_dict()
chorizon['comthk'] = chorizon['cokey'].apply(lambda x: thick[x],1)
chorizon['thickness_weight'] = chorizon['hzthk_r']/chorizon['comthk']
Calculated weighting of each important horizon variable.
In [ ]:
chorizon['tw_ksat'] = chorizon[['thickness_weight','ksat_r']].apply(lambda x: x[0]*x[1],1)
chorizon['tw_wilt'] = chorizon[['thickness_weight','wfifteenbar_r']].apply(lambda x: x[0]*x[1],1)
chorizon['tw_field_capacity'] = chorizon[['thickness_weight','wthirdbar_r']].apply(lambda x: x[0]*x[1],1)
chorizon['tw_dense'] = chorizon[['thickness_weight','dbovendry_r']].apply(lambda x: x[0]*x[1],1)
Combine horizons into components. Drop na and unneeded fields.
In [ ]:
component_props = chorizon.groupby('cokey').sum()
component_props.drop(['hzdept_r','hzdepb_r','dbovendry_r','ksat_r','awc_r','wthirdbar_r','wfifteenbar_r'],axis=1,inplace=True)
In [ ]:
component_props.dropna(subset=['tw_ksat','tw_wilt','tw_field_capacity','tw_dense'],how='all',inplace=True)
In [ ]:
component_props.reset_index(inplace=True)
Read in component table. Keep only keys and component percent field.
In [ ]:
component = pd.read_excel(soildir+'component.xlsx')
droplist = list(set(list(component.columns))-set(['comppct_r','compname','mukey','cokey']))
component.drop(droplist, axis=1, inplace=True)
In [ ]:
com_props = pd.merge(component_props, component, on='cokey')
In [ ]:
corest = pd.read_excel('H:/GIS/soils/STATSGO/corestrictions.xlsx')
corest.drop([u'reskind', u'reshard', u'resdept_l', u'resdept_h', u'resdepb_l', u'resdepb_r', u'resdepb_h',
u'resthk_l', u'resthk_r', u'resthk_h', u'corestrictkey'], inplace=True, axis=1)
In [ ]:
In [ ]:
comprops = pd.merge(corest,com_props, on='cokey')
Combine compenents for each mukey.
In [ ]:
cowt = comprops.groupby(['mukey'])['comppct_r'].sum().to_dict()
comprops['comppct_tot'] = comprops['mukey'].apply(lambda x: cowt[x],1)
comprops['comppct_r'] = comprops['comppct_r'] / comprops['comppct_tot']
comprops['thickness_m'] = comprops['comppct_r']*comprops['resdept_r']/100.0 # convert to meters
comprops['ksat'] = comprops['tw_ksat'] * comprops['comppct_r']
comprops['wilt'] = comprops['tw_wilt'] * comprops['comppct_r']
comprops['field_cap'] = comprops['tw_field_capacity'] * comprops['comppct_r']
comprops['dense'] = comprops['tw_dense'] * comprops['comppct_r']
In [ ]:
muprops = comprops.groupby(['mukey']).sum()
In [ ]:
dropfields = [u'cokey', u'hzthk_r', u'chkey', u'comthk', u'tw_ksat', u'tw_wilt',
u'tw_field_capacity', u'tw_dense', u'thickness_weight','comppct_tot']
muprops.drop(dropfields,axis=1,inplace=True)
In [ ]:
muprops['porosity'] = 1 - (muprops['dense']/2.65) # bulk density of material / density of quartz = percent solids
muprops['max_soil_cap_m'] = muprops['thickness_m']*muprops['porosity'] # meters of water
muprops['field_cap_m'] = muprops['max_soil_cap_m']*muprops['field_cap']
muprops['wilt_m'] = muprops['max_soil_cap_m']*muprops['wilt']
In [ ]:
muprops.drop(['resdept_r','comppct_r','dense'],axis=1,inplace=True)
In [ ]:
muprops.index = muprops.index.astype('str')
In [ ]:
muprops.to_pickle('H:/GIS/soils/STATSGO/muprops.pickle')
In [ ]:
muprops = pd.read_pickle('H:/GIS/soils/STATSGO/muprops.pickle')
In [ ]:
muprops
In [ ]:
path = 'H:/GIS/soils/STATSGO/STATSGO.gdb'
arcpy.env.workspace = path
arcpy.env.overwriteOutput = True
fc = 'H:/GIS/soils/STATSGO/STATSGO.gdb/gsmsoilmu_hucs'
narray = muprops.to_records()
arcpy.da.ExtendTable(fc, "MUKEY", narray, "mukey", append_only=False)
In [ ]:
path = 'H:/GIS/soils/STATSGO/STATSGO.gdb'
arcpy.env.workspace = path
arcpy.env.overwriteOutput = True
convfields = ['thickness_m','ksat','wilt_m','porosity','max_soil_cap_m','field_cap_m']
for field in convfields:
arcpy.PolygonToRaster_conversion(fc, field, field, 'CELL_CENTER','', 100)
In [ ]:
path = 'H:/GIS/soils/SSURGO/gssurgo_g_ut.gdb'
arcpy.env.workspace = path
arcpy.env.overwriteOutput = True
def arc_to_df(table, fieldlist):
fields = arcpy.ListFields(table)
return pd.DataFrame(arcpy.da.TableToNumPyArray(table,fieldlist,null_value=np.nan))
In [ ]:
In [ ]:
keeplist = ['hzname', 'hzdept_r','hzdepb_r','hzthk_r','dbovendry_r','ksat_r','awc_r','wthirdbar_r',
'wfifteenbar_r','cokey','chkey']
chorizon = arc_to_df('chorizon',keeplist)
In [ ]:
chorizon['hzthk_r'] = chorizon['hzdepb_r'] - chorizon['hzdept_r']
thick = chorizon.groupby(['cokey'])['hzthk_r'].sum().to_dict()
chorizon['comthk'] = chorizon['cokey'].apply(lambda x: thick[x],1)
chorizon['thickness_weight'] = chorizon['hzthk_r']/chorizon['comthk']
In [ ]:
chorizon['tw_ksat'] = chorizon[['thickness_weight','ksat_r']].apply(lambda x: x[0]*x[1],1)
chorizon['tw_wilt'] = chorizon[['thickness_weight','wfifteenbar_r']].apply(lambda x: x[0]*x[1],1)
chorizon['tw_field_capacity'] = chorizon[['thickness_weight','wthirdbar_r']].apply(lambda x: x[0]*x[1],1)
chorizon['tw_dense'] = chorizon[['thickness_weight','dbovendry_r']].apply(lambda x: x[0]*x[1],1)
In [ ]:
component_props = chorizon.groupby('cokey').sum()
component_props.drop(['hzdept_r','hzdepb_r','dbovendry_r','ksat_r','awc_r','wthirdbar_r','wfifteenbar_r'],axis=1,inplace=True)
In [ ]:
component_props.dropna(subset=['tw_ksat','tw_wilt','tw_field_capacity','tw_dense'],how='all',inplace=True)
In [ ]:
component_props.reset_index(inplace=True)
In [ ]:
component = arc_to_df('component',['comppct_r','compname','mukey','cokey'])
In [ ]:
com_props = pd.merge(component_props, component, on='cokey')
In [ ]:
fields = arcpy.ListFields('corestrictions')
fieldlist = [field.name for field in fields]
droplist = [u'reskind', u'reshard', u'resdept_l', u'resdept_h', u'resdepb_l', u'resdepb_r', u'resdepb_h',
u'resthk_l', u'resthk_r', u'resthk_h', u'corestrictkey']
keepfields = list(set(fieldlist)-set(droplist))
corest = arc_to_df('corestrictions',keepfields)
In [ ]:
In [ ]:
comprops = pd.merge(corest,com_props, on='cokey')
In [ ]:
cowt = comprops.groupby(['mukey'])['comppct_r'].sum().to_dict()
comprops['comppct_tot'] = comprops['mukey'].apply(lambda x: cowt[x],1)
comprops['comppct_r'] = comprops['comppct_r'] / comprops['comppct_tot']
comprops['thickness_m'] = comprops['comppct_r']*comprops['resdept_r']/100.0 # convert to meters
comprops['ksat'] = comprops['tw_ksat'] * comprops['comppct_r']
comprops['wilt'] = comprops['tw_wilt'] * comprops['comppct_r']
comprops['field_cap'] = comprops['tw_field_capacity'] * comprops['comppct_r']
comprops['dense'] = comprops['tw_dense'] * comprops['comppct_r']
In [ ]:
muprops = comprops.groupby(['mukey']).sum()
In [ ]:
dropfields = [u'hzthk_r', u'comthk', u'tw_ksat', u'tw_wilt',
u'tw_field_capacity', u'tw_dense', u'thickness_weight','comppct_tot']
muprops.drop(dropfields,axis=1,inplace=True)
In [ ]:
muprops['porosity'] = 1 - (muprops['dense']/2.65) # bulk density of material / density of quartz = percent solids
muprops['max_soil_cap_m'] = muprops['thickness_m']*muprops['porosity'] # meters of water
muprops['field_cap_m'] = muprops['max_soil_cap_m']*muprops['field_cap']
muprops['wilt_m'] = muprops['max_soil_cap_m']*muprops['wilt']
In [ ]:
muprops.drop(['resdept_r','comppct_r','dense'],axis=1,inplace=True)
In [ ]:
muprops.index = muprops.index.astype('str')
In [ ]:
muprops.to_pickle('H:/GIS/soils/SSURGO/muprops.pickle')
In [ ]:
muprops = pd.read_pickle('H:/GIS/soils/SSURGO/muprops.pickle')
In [ ]:
muprops.drop(['OBJECTID'],axis=1,inplace=True)
In [ ]:
fc = 'H:/GIS/soils/SSURGO/gssurgo_g_ut.gdb/gmupoly'
narray = muprops.to_records()
fieldlist = list(set(muprops.columns)-set(['mukey']))
arcpy.da.ExtendTable(fc, "MUKEY", narray, "mukey", append_only=False)
In [ ]:
path = 'H:/GIS/soils/SSURGO/ssurgo.gdb'
arcpy.env.workspace = path
arcpy.env.overwriteOutput = True
fc = 'gmupoly'
convfields = ['thickness_m','ksat','wilt_m','porosity','max_soil_cap_m','field_cap_m']
for field in convfields:
arcpy.PolygonToRaster_conversion(fc, field, field, 'CELL_CENTER','', 100)
In [ ]:
convfields = ['thickness_m','ksat','wilt_m','porosity','max_soil_cap_m','field_cap_m']
path1 = 'H:/GIS/soils/SSURGO/ssurgo.gdb/'
path2 = 'H:/GIS/soils/STATSGO/STATSGO.gdb/'
for field in convfields:
rfilled = arcpy.sa.Con(arcpy.sa.IsNull(path1+field), path2+field, path1+field)
rfilled.save('H:/GIS/Soils.gdb/'+field)
PRISM Data http://www.prism.oregonstate.edu/
In [ ]:
monthRange = [1,12]
fileplace = 'H:/GIS/PRISM/PRISM'
for m in range(monthRange[0],monthRange[1]+1):
testfile = urllib.URLopener()
testfile.retrieve("http://services.nacse.org/prism/data/public/normals/800m/ppt/"+ str(m).zfill(2),
fileplace+str(m).zfill(2)+'.zip')
In [ ]:
testfile = urllib.URLopener()
testfile.retrieve("http://services.nacse.org/prism/data/public/normals/800m/ppt/"+ '14',
fileplace+'14'+'.zip')
In [ ]:
fileplace = 'H:/GIS/PRISM/PRISM/'
In [ ]:
def unzipper(filepath):
import zipfile
f = zipfile.ZipFile(filepath,'r')
f.extractall(filepath[:-6])
In [ ]:
for files in glob.glob(fileplace+'*.zip'):
unzipper(files)
print(files)
In [ ]:
monthRange = [1,12]
fileplace = 'H:/GIS/PRISM/PRISM'
for m in range(monthRange[0],monthRange[1]+1):
testfile = urllib.URLopener()
testfile.retrieve("http://services.nacse.org/prism/data/public/normals/800m/ppt/"+ str(m).zfill(2),
fileplace+str(m).zfill(2)+'.zip')
testfile = urllib.URLopener()
testfile.retrieve("http://services.nacse.org/prism/data/public/normals/800m/ppt/"+ 14,
fileplace+str(m).zfill(2)+'.zip')
Calculates monthly available water:
$$Available\;water = Rain + Snowmelt$$
In [ ]:
path="H:/GIS/SNODAS.gdb/"
path2 = "U:/GWP/Groundwater/Projects/BCM/Data/AvailableWater2.gdb/"
def calc_avail_water(path, path2, months='',years='')
arcpy.env.workspace = path
arcpy.env.overwriteOutput = True
if months == '':
months = [1,12]
if years == '':
years = [2004,2014]
for y in range(years[0], years[1]+1): #set years converted here
for m in range(months[0], months[1]+1): #set months converted here
my = str(y)+str(m).zfill(2)
newdn = 'AVWT' + my
rain = Raster('RAIN'+ my +'SUM')
melt = Raster('SNML'+ my +'SUM')
calc = rain + melt
calc.save(path2+newdn)
print(newdn)
calc_avail_water(path, path2)
The calculation of excess water provides the water that is available for watershed hydrology. Available water occupies the soil profile, where water will become actual evapotranspiration,and may result in runoff or recharge, depending on the permeability of the underlying bedrock. Total soil‐water storage is calculated as porosity multiplied by soil depth. Field capacity (soil water volume at ‐0.3 megapascals [MPa]) is the soil water volume below which drainage is negligible, and wilting point (soil water volume at ‐1.5 MPa) is the soil water volume below which actual evapotranspiration does not occur (Hillel 1980).
Once available water is calculated, it may exceed total soil storage and become runoff, or it may be less than total soil storage but greater than field capacity and become recharge. Anything less than field capacity will be calculated as actual evapotranspiration at the rate of PET for that month until it reaches wilting point. When soil water is less than total soil storage and greater than field capacity, soil water greater than field capacity equals recharge. If recharge is greater than bedrock permeability (K), then recharge = K and excess becomes runoff, else it will recharge at K until field capacity. Runoff and recharge combine to calculate basin discharge, and actual evapotranspiration is subtracted from PET to calculate climate water deficit.
Equation 1
Equation 2
Equation 3
Equation 4
In [ ]:
from arcpy import Raster
In [ ]:
monthRange = [1,12]
yearRange = [2004,2014]
path = "U:/GWP/Groundwater/Projects/BCM/Data/"
field_cap = Raster(path + "Soil.gdb/fieldCap")
wilt_point = Raster(path + "Soil.gdb/WiltPoint")
T_soil_water = Raster(path + "Soil.gdb/Tsoilwater")
geol_k = Raster(path + "Soil.gdb/Geol_K")
#geol_k = Raster(path + "Soil.gdb/BMC_K")
avail_water = "U:/GWP/Groundwater/Projects/BCM/Data/AvailableWater2.gdb/"
pet = path+'MODIS16.gdb/'
results = "U:/GWP/Groundwater/Projects/BCM/Data/Results.gdb/"
def UBM_calc(results, field_cap, wilt_point, T_soil_water, geol_k, avail_water, pet, months = '', years = ''):
arcpy.env.workspace = path
arcpy.env.overwriteOutput = True
if months == '':
months = [1,12]
if years == '':
years = [2004,2014]
av_soil_water = field_cap
for y in range(years[0],years[1]+1): #set years converted here
for m in range(months[0],months[1]+1): #set months converted here
my = str(y) + str(m).zfill(2)
av_wtr = Raster(avail_water+'AVWT'+ my)
pet_rast = Raster(pet + 'PET'+ my)
av_soil_water = av_wtr + av_soil_water
#Eq 1
av_recharge1 = "T_soil_water - field_cap"
recharge1 = "Con(eval(av_recharge1) > geol_k, geol_k, eval(av_recharge1))"
runoff1 = "(av_soil_water - T_soil_water) + Con(eval(av_recharge1) > geol_k, eval(av_recharge1) - geol_k, 0)"
#Eq2
av_recharge2 = "av_soil_water - field_cap"
recharge2 = "Con(eval(av_recharge2) > geol_k, geol_k, eval(av_recharge2))"
runoff2 = "Con(eval(av_recharge2) > geol_k, eval(av_recharge2) - geol_k, 0)"
#Eq3 recharge3 = 0 runoff3 = 0 aet = pet_rast
#Eq4 recharge3 = 0 runoff3 = 0 aet = 0
#Order of if/then is Eq 1, Eq 4, Eq 2, Eq 3
recharge = Con(av_soil_water > T_soil_water, eval(recharge1),
Con(av_soil_water < wilt_point, 0,
Con((av_soil_water < T_soil_water) & (av_soil_water > field_cap), eval(recharge2),
Con((av_soil_water > wilt_point) & (av_soil_water < field_cap), 0, 0))))
recharge.save(results + 'rec' + my)
runoff = Con(av_soil_water > T_soil_water, eval(runoff1),
Con(av_soil_water < wilt_point, 0,
Con((av_soil_water < T_soil_water) & (av_soil_water > field_cap), eval(runoff2),
Con((av_soil_water > wilt_point) & (av_soil_water < field_cap), 0, 0))))
runoff.save(results + 'run' + my)
aet = Con(av_soil_water > T_soil_water, pet_rast,
Con(av_soil_water < wilt_point, 0,
Con((av_soil_water < T_soil_water) & (av_soil_water > field_cap), pet_rast,
Con((av_soil_water > wilt_point) & (av_soil_water < field_cap), pet_rast,0))))
aet.save(results + 'aet' + my)
av_soil_water = av_soil_water - runoff - recharge - aet
av_soil_water = Con(av_soil_water > 0.0, av_soil_water, 0.0)
av_soil_water.save(results + 'asw' + my)
print(my)
In [ ]:
from arcpy import env
env.overwriteOutput = True
env.workspace = "C:/Users/PAULINKENBRANDT/AppData/Roaming/ESRI/Desktop10.4/ArcCatalog/AGRC_SGID.sde"
In [ ]:
def monthly_to_yearly(path, code, yearRange='', statistics='SUM'):
if yearRange=='':
yearRange = [2004,2014]
arcpy.env.workspace = path
arcpy.env.overwriteOutput = True
for y in range(yearRange[0],yearRange[1]+1): #set years converted here
ylist = arcpy.ListRasters(code+str(y)+"*") #pick all files from raw data folder of a data type
calc = CellStatistics(ylist, statistics_type = statistics, ignore_nodata="DATA")
outnm = 'y'+code+str(y)
desc = arcpy.Describe(calc)
print(outnm)
calc.save(outnm)
In [ ]:
def summarize(path, code, statistics='MEAN'):
arcpy.env.workspace = path
arcpy.env.overwriteOutput = True
rlist = arcpy.ListRasters(code+"*") #pick all files from raw data folder of a data type
# arcpy sa functions that summarize the daily data to monthly data
calc = CellStatistics(rlist, statistics_type = statistics, ignore_nodata="DATA")
outnm = code+"_"+statistics
calc.save(outnm)
print(outnm)
In [ ]:
path = 'H:/GIS/Results.gdb'
code = 'rec'
monthly_to_yearly(path,code)
#summarize(path,code)
In [ ]:
code = 'yrec'
summarize(path,code)
In [ ]:
path = 'H:/GIS/Results.gdb'
code = 'run'
monthly_to_yearly(path,code)
code = 'yrun'
summarize(path,code)
In [ ]: